brms Custom Familynbinom_type1.RNegative Binomial distribution parameterized by mean (mu) and overdispersion parameter (theta). This parameterization is referred to as NEGBIN type I (Cameron and Trivedi, 1998) as cited by https://doi.org/10.1080/03610926.2018.1563164 ##
x ~ nbinom_type1(mu, theta), whereE(x) = mu, Var(x) = (theta + 1) muThis should not be confused with the mu and shape parameterization of nbinom in R or the ‘alternative’ NB (neg_binomial_2_...) in stan Note using disp instead of theta because using theta gives the error > Error: Currently ‘dirichlet’ is the only valid prior for simplex parameters. See help(set_prior) for more details. when trying to fit the model.
Earlier work generated poor estimates of
x0.
Visualization of data and model fit indicates there’s very little information on x0.
While I can generate predictions of expected values, I can’t
generate expected values of the data itself. I expect this is due to
fact that we generate parameters which result in y =
NaN
TODO
Figure out better model definition that avoids generating NaN
values. I expect this can be done by imposing a better prior on
x0|male.
Allow disp to vary between males.
On 3/22/2023 I added the missing \(|d g^{-1}(disp)/d disp| = |- mu/disp^2| = mu/disp^2\) term to llikelihood function ## Insights
When the disp (dispersal or theta) gets
unrealistically large, we get the emergence of a bimodal distribution at
both ends of x0 values.
Even though we included this value, it is very unlikely to be 25C
values. I interpret this to mean that when things are really noisy (high
theta), one way to interpret the data is that one set of
males has a very long (presumably slow) decline. It would be good to
look at the correlations via pairs().
To me this is consistent with the infomal knowledge that the
Two males have fitting issues, “T235” and “T236”. This appears to
be due to a bimodal posterior surface where one region has low ‘x0’
(< 40C), but low ‘y0’, ane the other has a high x0 and
low y0
## load libraries
library(MASS) # provides negative binomial fitting: glm.nb
library(stats)
library(tidyverse)
library(brms)
library(loo)
library(ggplot2)
#library(tidybayes)
library(ggpubr)
library(grid)
library(gridExtra)
library(ragg)
library(GGally)
library(cowplot)
library(bayesplot)
ggplot2::theme_set(theme_default(base_size = 10))
#ggplot2::theme_set(theme_default(plot.background = element_rect(color = "black")))
library(broom)
library(viridisLite)
library(cmdstanr)
library(rstan)
options(mc.cores = (parallel::detectCores()-2))
rstan_options(auto_write = TRUE)
## options(ggplot2.continuous.colour="viridis",
## ggplot2.discrete.colour="viridis",
## ggplot2.scale_fill_discrete = scale_fill_viridis_d,
## ggplot2.scale_fill_continuous = scale_fill_viridis_c)
library(reshape2)
library(lme4)
library(latex2exp)
source("../Local.Functions/local.functions_ZFI.fittings.R")
which_switch_male <- function(flag) {
return <- NA #default value
if(flag %in% c("uniform_1", "groups_1")) return <- "single"
if(flag %in% c("uniform_2", "groups_2")) return <- "double"
if(flag %in% c("individual")) return <- "individual"
return(return)
}
source("../../../custom-brms-families/families/nbinom_type1.R")
infiles <- file.path("input", dir("input"))
print(infiles)
## [1] "input/data_ind.Rda"
## [2] "input/data.processing_2022-12-15.Rda"
## [3] "input/obs_summary_stats.Rda"
## [4] "input/stats_ind.Rda"
sapply(infiles,
load, verbose = TRUE, envir = .GlobalEnv)
## Loading objects:
## data_ind
## Loading objects:
## motif_data
## motif_data_40C
## motif_stats
## motif_stats_40C
## bird_bill_data
## Loading objects:
## summary_stats
## Loading objects:
## stats_ind
## $`input/data_ind.Rda`
## [1] "data_ind"
##
## $`input/data.processing_2022-12-15.Rda`
## [1] "motif_data" "motif_data_40C" "motif_stats" "motif_stats_40C"
## [5] "bird_bill_data"
##
## $`input/obs_summary_stats.Rda`
## [1] "summary_stats"
##
## $`input/stats_ind.Rda`
## [1] "stats_ind"
head(stats_ind)
## # A tibble: 6 × 9
## male round n_obs total_round mean_round sd_round cv_round total mean
## <fct> <dbl> <int> <int> <dbl> <dbl> <dbl> <int> <dbl>
## 1 T234 1 13 203 40.6 32.0 0.787 601 46.2
## 2 T235 1 13 882 176. 132. 0.748 2333 179.
## 3 T236 1 13 758 152. 46.0 0.303 2095 161.
## 4 T243 1 13 438 87.6 76.4 0.872 1861 143.
## 5 T244 1 13 270 54 14.7 0.272 993 76.4
## 6 T246 1 5 253 50.6 54.6 1.08 253 50.6
names(stats_ind)
## [1] "male" "round" "n_obs" "total_round" "mean_round"
## [6] "sd_round" "cv_round" "total" "mean"
head(data_ind)
## # A tibble: 6 × 11
## male index motif_count temp_target temp round trial_round date counter
## <chr> <int> <int> <dbl> <dbl> <dbl> <dbl> <chr> <chr>
## 1 T234 1 0 42 43.0 1 1 02/03/22 RAS
## 2 T234 1 30 44 44.5 1 2 02/05/22 RAS
## 3 T234 1 34 27 27.2 1 3 02/07/22 RAS
## 4 T234 1 87 40 41.1 1 4 02/09/22 RAS
## 5 T234 1 52 35 36.1 1 5 02/11/22 RAS
## 6 T234 1 32 40 39.5 2 1 04/23/22 KIM
## # ℹ 2 more variables: y0_simple_est <dbl>, phi_ind <dbl>
names(data_ind)
## [1] "male" "index" "motif_count" "temp_target"
## [5] "temp" "round" "trial_round" "date"
## [9] "counter" "y0_simple_est" "phi_ind"
#set seom variables that I expect to change below
display_plots <- FALSE
save_plots_file <- FALSE
color_scheme_set("viridis")
cumulative_count_vs_temp <- list()
for(temp_threshold in 26:45) {
tmp <- data_ind %>% group_by(male) %>% filter(temp < temp_threshold) %>% summarize(mean = mean(motif_count), sd = sd(motif_count), var = sd^2, cv = sd(motif_count)/mean(motif_count), temp_threshold = temp_threshold)
mean <- mean(tmp$mean)
sd <- sd(tmp$mean)
cumulative_count_vs_temp[[temp_threshold]] <- tmp
print(paste0("Temp: ", temp_threshold, ", mean: ", mean, ", sd: ", sd))
}
plot_pairs <- pairs(cumulative_count_vs_temp[[39]] %>% select(-c(male, sd, cv)))
if(display_plots) print(last_plot())
hist <- ggplot(cumulative_count_vs_temp[[39]], aes(mean)) +
geom_histogram(bins = 6)
hist_log <- hist + scale_x_log10()
plot_grid <- plot_grid(plotlist = list(hist, hist_log))
If using a normal prior, go with mean = 125 and
sd = 125 * 4 = 500. However, the data doesn’t seem to
follow any real distribution and its at the motif rather than song
scale, as a result each male has its own, unknown scaling factor between
motifs and songs (i.e. 1/E(# motifs/song) so a flat prior is
justifiable.
../2023-02-28_fit.real.data.using.nbinom_type1/brms_two.piece_fit.nbionm_type1.Rmddata_tbl <- data_ind %>%
rename(y = motif_count, x = temp) #%>%
#mutate(male = factor(male))
males <- unique(data_tbl$male)
nmales <- length(males)
xmax <- 46 # maximum value for x0
xignore <- 39 # x value above which data is ignored in one_piece model
stan_two_piece_func <- paste0(" real two_piece(real x, real x0, real y0) {
real xmax = ", xmax, "; ## paste in value for xmax
real y;
if(x0 > xmax) {
y = log(0);
} else {
y = y0 * (xmax - fmax(x0, x))/(xmax - x0);
}
return(y);
}\n")
stan_one_piece_func <- paste0(" real one_piece(real y0) {
return(y0);
}\n")
stan_asymptotic_func <- paste0(" real asymp(real x, real phi, real y0) {
real xmax = ", xmax, "; ## paste in value for xmax\n
return(y0 * (1 - exp( - phi * (xmax - x))) );
}\n")
cat(stan_two_piece_func)
## real two_piece(real x, real x0, real y0) {
## real xmax = 46; ## paste in value for xmax
## real y;
##
## if(x0 > xmax) {
## y = log(0);
## } else {
## y = y0 * (xmax - fmax(x0, x))/(xmax - x0);
## }
## return(y);
## }
fit_tbl_initiate_crossed <- TRUE
models <- c("one_piece", "two_piece", "asymptotic") #, "one_piece") #, "asymptotic")
sampling_dists <- c("nbinom_type1") ##, "com_poisson") ## lognormal doesn't work since the counts can be 0.
flags_x0 <- c("uniform_1",
"uniform_2",
"groups_1", ## this doesn't work with x0_Intercept prior, suggests error in priors
"groups_2", # "groups_2a",
# "groups_2b",
"individual")
flags_y0 <- c("uniform_1", "groups_1", "individual") %>% last()
values_disp <- switch(1,
c(0.01), # 0.125 is a good value
c(0.01, 0.1), #, 0.25), # used in exploring model behavior
list(0.1, "flat"), #, 0.1, 1) # doesn't work yet
c("flat"))
flags_disp <- c("uniform_1", "uniform_2", "groups_1", "individual")
## whether to filter males with large disp values estimated using one piece model
filter_male <- c(TRUE, FALSE)
N <- length(data)
## define full table a priori
fit_tbl_crossed <- crossing(
model = models,
#sampling_dist = sampling_dists,
x0_flag = flags_x0,
y0_flag = flags_y0,
disp_flag = flags_disp,
disp_value = values_disp,
filter_male = filter_male,
fit = list(NA),
stats_fit = list(NA),
fit_best = list(NA),
fit_top = list(NA),
plots = list(list()),
llik = list(NA),
r_eff = list(NA),
loo = list(NA)
)
if(fit_tbl_initiate_crossed) {
fit_tbl <- fit_tbl_crossed
} else {
## Use an empty tibble
fit_tbl <- fit_tbl_crossed[0, ]
}
Note
results=... added to allow use of
\clearpage For more details, see: [https://stackoverflow.com/a/48069427/5322644].##, results='asis', eval=(knitr::opts_knit$get('rmarkdown.pandoc.to') == 'latex'), echo = TRUE}
if(interactive()) {
## Run Settings
run_fits <- TRUE
## run_fits <- FALSE
save_fits <- FALSE
## save_fits <- TRUE
run_fits_force <- TRUE # force refitting of model; used with run_fits = TRUE
load_fits <- FALSE # reload fit_tbl even if it already exits; used with run_fits = FALSE
verbose <- TRUE
## Fit Settings
render_plots <- TRUE ## Build or use previously generated plots in tbl
render_plots_force <- TRUE ## Force regeneration of plots if they already exist
render_hex <- TRUE
render_hist <- TRUE
render_pairs <- FALSE
off_diag_fun = "hex"
display_plots <- FALSE ## Display plots to local device?
save_plots_file <- TRUE ## Save plots to file?
} else {
## Non-interactive (Knitr) settings
run_fits <- FALSE
save_fits <- FALSE
## save_fits <- TRUE
run_fits_force <- FALSE # force refitting of model; used with run_fits = TRUE
load_fits <- TRUE # load fit_tbl below?
verbose <- TRUE
## Fit Settings
render_plots <- TRUE ## Build or use previously generated plots in tbl
render_plots_force <- TRUE ## Force regeneration of plots if they already exist
render_hex <- TRUE
render_hist <- TRUE
render_pairs <- TRUE
off_diag_fun <- "hex"
display_plots <- TRUE
save_plots_file <- TRUE
}
## Print settings
print_get_prior <- TRUE ## Print generic priors, prior to fit.
print_prior_summary <- TRUE ## Print actual priors, post fitting
if(run_fits_force) {
infile_tbl <- NULL
} else {
infile <- last(dir(file.path(output_dir, "tibbles"), "fit_tbl.*"))
## over ride latest file below
#infile <- "fit_tbl_adapt-delta-0.90.Rda"
infile_tbl <- file.path(output_dir, "tibbles", infile)
if(load_fits) {
print(paste0("Loading `infile_tbl` = ", infile_tbl))
load(infile_tbl)
#fit_tbl[["plots"]] <- list()
}
}
## [1] "Loading `infile_tbl` = output/render/tibbles/fit_tbl_2023-05-09_14.09.19.Rda"
if(save_fits) {
cur_time <- gsub(" ", "_", Sys.time()) %>% gsub(":", ".", .)
outfile_tbl <- file.path(output_dir, "tibbles", paste0("fit_tbl_", cur_time, ".Rda"))
} else {
outfile_tbl <- NA ## NULL causes an error when trying to print
}
print_pairs_plot <- FALSE # Could base on model used, gets large when lots of individual or groups
sampling = "nbinom_type1"
prior_shape_y0 = "flat"
n_chains <- 10
n_cores <- n_chains
n_chains_top <- 4 # how many to keep for model analysis
flags_x0_used <- c("individual", "groups_1", "uniform_1") # %>% rev()#
flags_y0_used <- c("individual")
values_disp_used <- values_disp
flags_disp_used <- c("individual", "groups_1", "groups_2", "uniform_1", "uniform_2")[4:5] # |> rev() |> first()
models_used <- c("one_piece", "two_piece", "asymptotic")[2] #c("one_piece", "two_piece") #, "two_piece")
shape_y0_prior <- "flat" # flat or normal
## These males produce bimodial posteriors and interfere with model fitting
## Ideally, we'd do a preliminary analysis without them and then include them later.
#male_filter = c("T235", "T236")
## These males have huge dispersion outside the predicted valeus
male_disp_high <- c("T235", "T243")
male_x0_high <- NA ## Define later
male_filter_out <- c("T235", "T243")
curr_row <- 1
for(model in models_used) {
print(model)
switch(model,
two_piece = {
## Note issues in non-convergence are related to bimodality of posterior surface.
stan_func <- stan_two_piece_func
warmup <- 30000 # floor(3/4 * iter)
iter <- warmup + 10000
adapt_delta <- 0.9 ## increasing from 0.9 to 0.99 didn't help eliminate divergence
thin <- 5
},
one_piece = {
stan_func <- stan_one_piece_func
warmup <- 2000
iter <- warmup + 2000
thin <- 4
adapt_delta <- 0.7
},
asymptotic = {
stan_func <- stan_asymptotic_func
warmup <- 2000
iter <- warmup + 2000
thin <- 4
adapt_delta <- 0.9
}
)
cat(stan_func)
for(male_filter in c(FALSE)) { #, TRUE)) {
for(disp_flag in flags_disp_used) {
for(x0_flag in flags_x0_used) {
for(y0_flag in flags_y0_used) {
## define variable for labeling figures
x0_label <- ifelse(model == "one_piece", "NA", x0_flag)
for(disp_value in values_disp_used) {
## Set up variables for saving model and fit
desc_short <- paste0("x0: " , x0_label, "; y0: ", y0_flag, "; disp_flag: ", disp_flag, "; disp prior: ", disp_value, "; filter: ", male_filter)
desc <- paste0(sampling, "; ", model, "; ", desc_short)
#print(desc)
desc_filename <- gsub("_|\\.", "-", desc) %>%
gsub("; ", "_", .) %>%
gsub(":? ", "-", .)
#stan_model_name <- sub(desc_filename, "_disp-prior-[0-9.]+_filter", "_filter")
stan_model_name <- desc_filename #sub("_disp-prior-[0-9.]+_", "_", desc_filename)
if(run_fits) {
ifelse(length(fit_tbl) >= curr_row, fit_prerun <- fit_tbl[[curr_row, "fit"]][[1]], fit_prerun <- "NA")
if(run_fits_force | !is_brmsfit(fit_prerun)) {
print("Fitting Models")
switch(sampling,
"nbinom_type1"= {
family <- nbinom_type1(link = "identity", link_disp = "identity")
adapt_delta <- adapt_delta #0.95 ## will decreasing value increase ESS? Seems like it
iter <- iter
warmup <- warmup
thin <- thin
n_cores <- n_cores ## set to 1 if getting errors from stan in order to see relevant message.
n_chains <- n_chains
stanvar_func <-
stanvar(scode = paste(
stan_func,
stan_nbinom_type1, sep = "\n"),
block = "functions")
}
)
## Refresh data in case x0_group or y0_group are all set to 1
data <- data_tbl
if(model == "one_piece") data <- data %>% filter(x < xignore)
if(male_filter) data <- data %>%
filter(!(male %in% male_filter_out))
print("Set grouping flags and formula")
for(flag_prefix in c("x0", "y0", "disp")) {
group_txt <- paste0(flag_prefix, "_group")
flag_txt <- paste0(flag_prefix, "_flag")
flag <- eval(parse(text = flag_txt))
## Map from flags to more general categories below: "single", "double"...
switch_male <- which_switch_male(flag)
data <- switch(switch_male,
"single" = {
mutate(data, tmp_group = 1)
},
"double" = {
male_high <- eval(parse(text=paste0("male_", flag_prefix, "_high")))
mutate(
data,
tmp_group =
as.factor(if_else(male %in% male_high, 2, 1)))
},
"individual" = {
mutate(data, tmp_group = 1) #tmp_group = male)
},
NA
) %>%
rename(!!group_txt := tmp_group)
## Define model formulas
flag_txt <- paste0(flag_prefix, "_flag")
flag <- eval(parse(text = flag_txt))
print(paste0(flag_prefix, ": ", flag))
flag_str <- paste0(flag_prefix, "_group")
string_formula <- switch(flag,
uniform_1 = paste0(flag_prefix, " ~ 0 + Intercept"),
## Doesn't work: paste0(flag_prefix, " ~ 0 + ", flag_str),
uniform_2 = paste0(flag_prefix, " ~ 0 + ", flag_str),
## `0 + Intercept` avoids prior being defined on centered data
## The following formulation is incorrect and
## assumes that x0 is centered around 0
## groups_1 = formula(x0 ~ 0 + (1|male)),
groups_1 = paste0(flag_prefix, " ~ 0 + Intercept + (1|male)"),
##paste0(flag_prefix, " ~ 0 + ", flag_str, " + (", flag_str, "|male)"),
groups_2 = paste0(flag_prefix, " ~ 0 + Intercept + (1|male) + ", flag_str),
##paste0(flag_prefix, " ~ 0 + (", flag_str, "|male)"),
individual = paste0(flag_prefix, " ~ 0 + male"), ## Do not use 1 + male!
NA
)
form_txt <- paste0(flag_prefix, "_form")
assign(form_txt, as.formula(string_formula))
}
## str_extract_all(names(data), ".*_flag") %>%
## unlist() %>%
## print()
## Used in asymptotic model: use phi instead of threshold x0
phi_form <- formula(deparse(x0_form) %>% gsub("x0", "phi", .))
threshold_form <- switch(model,
two_piece = x0_form,
one_piece = NULL,
asymptotic = phi_form
)
nlform <- switch(model,
two_piece = bf(y ~ two_piece(x, x0, y0), nl = TRUE),
one_piece = bf(y ~ one_piece(y0), nl = TRUE),
asymptotic = bf(y ~ asymp(x, phi, y0), nl = TRUE)
) +
threshold_form +
disp_form +
y0_form
print("Define priors")
## pass disp_value via stanvar argument
stanvar_prior <- stanvar(disp_value, name = "disp_value")
prior_string <- if(disp_value == "flat") {
"uniform(0, 20)"
} else {
# encode non-flat prior here, which force recompling when disp_value changes
#paste0("exponential(", disp_value, ")")
# pass disp_value via stanvar argument
# Allows disp_value to be changed w/o recompiling
"exponential(disp_value)"
}
disp_priors <- switch(disp_flag,
#uniform_1 = set_prior(prior_string, class = "disp", lb = 0, ub = 20),
## Form when disp ~ 1:
uniform_1 = set_prior(prior_string, class = "b", dpar = "disp", lb = 0, ub = 20),
uniform_2 = NULL, ## This is probably broken
groups_1 = set_prior(prior_string,
class = "b", dpar = "disp", lb = 0, ub = 20) +
set_prior("uniform(0.1, 5)", class = "sd", dpar = "disp", lb = 0.4, ub = 5),
groups_2a = NULL,
groups_2b = NULL,
individual = set_prior(prior_string,
dpar = "disp", lb = 0, ub = 20)
)
## x0 only used in two_piece model
x0_prior <- switch(x0_flag,
uniform_1 = NULL,
uniform_2 = NULL,
groups_1 = prior(student_t(3, 0, 66.7), lb = 0, ub = 10, class = "sd", nlpar = "x0"),
groups_2 = NULL,
individual = NULL
)
x0_priors <- prior(uniform(32, 45.9), lb = 32, ub = 45.9, nlpar = "x0") +
#prior(uniform(32, 45.9), lb = 32, ub = 45.9, nlpar = "x0") +
x0_prior
phi_priors <- prior(uniform(0.1, 100), lb = 0.01, ub = 17, nlpar = "phi")
y0_priors <- switch(prior_shape_y0,
## Values based on calculations at top of file using `temp_threshold`,
normal = prior(normal(125, 500), nlpar = "y0", lb = 10, ub = 1000),
# flat prior
# - consistent with fact we're working with motifs, not songs
# - avoids bimodal posterior sampling issues with T235 and 236
flat = prior(uniform(10, 1000), nlpar = "y0", lb = 10, ub = 1000)
)
threshold_priors <- switch(model,
two_piece = x0_priors,
one_piece = NULL,
asymptotic = phi_priors
)
prior <- switch(model,
one_piece = {
y0_priors + disp_priors
},
threshold_priors + y0_priors + disp_priors
)
if(print_get_prior) {
tmp <- get_prior(nlform,
data = data,
family = family
)
print(tmp,
max.print = 500)
}
stan_code <- file.path(output_dir,
"stan", "code", paste0(stan_model_name, ".stan"))
## make_stancode( .... save_model = stan_code)
fit <- brm(nlform,
data = data,
## `link` refers to the mapping of the expectation of the distribution: log, sqrt, identity, softplus
## link_shape corresponds to `phi` of `stan`'s
## Negbinomial_2
## Defining `phi = mu/theta` creates a quasipoisson
## distribution with overdispersion parameter (1 +theta)
family = family, #negbinomial(link = "identity", link_shape = "identity"),
prior = prior,
stanvar = stanvar_func + stanvar_prior, ## pass prior values here
iter = iter,
warmup = warmup,
thin = thin,
silent = ifelse(interactive(), 1, 2), # 0, 1, or 2. 1 is default
control = list(adapt_delta = adapt_delta,
max_treedepth = 12
##model_name = desc ## Incorrect way to set this.
),
## Ideally save model to avoid need to recompile
stan_model_args = list(
model_name = file.path(output_dir, "stan", "binary", stan_model_name)
),
#sample_prior = "no", ## note improper priors not sampled
## Only print out sampling progress if in interactive mode
refresh = ifelse(interactive(),max(iter/5, 1), 0),
chains = n_chains,
cores = n_cores,
save_model = stan_code
)
## We should repeatedly refit model until we get desired number of fit_best
#print("Prior Summary")
#print(prior_summary(fit))
#print("Fit Information")
#print(desc)
#print(fit)
#fit_exp <- expose_functions(fit) , vectorize = TRUE)
#fit_cr <- add_criterion(fit_exp, c("loo", "waic"))
fit_tbl[[curr_row, "fit"]] <- list(fit)
fit_tbl[[curr_row, "x0_flag"]] <- x0_flag
fit_tbl[[curr_row, "y0_flag"]] <- y0_flag
fit_tbl[[curr_row, "disp_flag"]] <- disp_flag
fit_tbl[[curr_row, "disp_value"]] <- disp_value
## Print current warnings
warnings(summary)
## Clear warnings()
} else {
print("Using pre existing fit")
fit <- fit_prerun
}
## Extract up to `n_chains_top` chains for fit_best and fit_top
verify_brmsfit(fit)
fit_brms <- fit
fit_stan <- fit_brms$fit
stats_fit <- calc_fit_lp_stats(fit_brms)
print(stats_fit %>% arrange(desc(mean)))
fit_tbl[[curr_row, "stats_fit"]][[1]] <- stats_fit
## Get all fits similar to the best one
## DOESN"T WORK CONSISTENTLY
## There's an issue with the local.functions code
## not working with both stanfit and brmsfit objects
## It's very confusing
make_stan_best <- TRUE
if(make_stan_best) {
fit_stan_best <- keep_chains_top(fit_stan, n_chains_max = n_chains_top, verbose = TRUE, best_only = TRUE)
## use existing brms object to update
fit_best <- fit_brms
fit_best$fit <- fit_stan_best
## update tbl
##str(fit)
fit_tbl[[curr_row, "fit_best"]][[1]] <- fit_best
}
## DOESN"T WORK CONSISTENTLY
## There's an issue with the local.functions code
make_stan_top <- TRUE
if(make_stan_top) {
## Get the top fits
fit_stan_top <- keep_chains_top(fit_stan, n_chains_max = n_chains_top, verbose = FALSE, best_only = FALSE)
## Use current fit object to construct new one
fit_top <- fit_brms
fit_top$fit <- fit_stan_top
## update tbl
fit_tbl[[curr_row, "fit_top"]][[1]] <- fit_stan_top
}
## end if(run_fits)
} else {
print("Working with Pre-existing Fits")
## Try to assign from local memory.
if(!exists("fit_tbl") | nrow(fit_tbl) == 0) {
print(paste("Loading Models from file", infile_tbl))
load(file = infile_tbl, verbose = TRUE)
## Need only for older fittings
fit_tbl <- add_column_safely(fit_tbl, "plots")
}
fit <- fit_tbl[[curr_row, "fit"]][[1]]
print(paste0("`class(fit)` = ", class(fit)))
fit_stan <- fit$fit
## These shoudl be brmsfit objects
fit_top <- fit_tbl[[curr_row, "fit_top"]][[1]]
fit_best <- fit_tbl[[curr_row, "fit_best"]][[1]]
stats_fit <- fit_tbl[[curr_row, "stats_fit"]][[1]]
} ## end loading of previous fits
cat("\n\n\n")
## lp_stats <- calc_fit_lp_stats(fit)
if(print_prior_summary) {
print(desc)
print("Prior Information")
print(prior_summary(fit)) # %>% filter(nlpar!="y0"))
}
print(desc)
print("Fit Information")
print(summary(fit)) #, pars = "x0*")) %>% filter(nlpar!="y0"))
print(stats_fit)
## Clean up variable names
fit_stan_rename <-
fit_stan %>%
clean_var_names()
data <- fit$data
vars_clean <- names(fit_stan_rename) %>% na.omit(.)
male_vec <- unique(data$male) %>% as.character(.)
## get male specific vars (start with "T")
vars_T <- grep("^T[0-9]{3}", vars_clean, value = TRUE)
vars_Intercept <- grep("Intercept", vars_clean, value = TRUE)
vars_non_T <- vars_clean[!(vars_clean %in% c(vars_T, vars_Intercept))]
drop_lprior <- TRUE
if(drop_lprior) vars_non_T <- str_subset(vars_non_T, "lprior", negate = TRUE)
#print(vars_clean)
## Get plots for current fit settings
plots_row <- fit_tbl[[ curr_row, "plots"]][[1]]
if(verbose) print("Plotting Trace")
## indicate plot name
plot_name <- "plot_trace"
plot_curr <- plots_row[[plot_name]]
if( (is.null(plot_curr) | render_plots_force) & render_plots) { ## Generate and append plot
## Plotting code goes below here
plot_trace <- mcmc_trace(fit, pars = c("lp__")) +
ggtitle(desc_short)
## Finish plotting code
plots_row[[plot_name]] <- plot_trace
} else{
## update last_plot() settings
set_last_plot(plot_trace)
}
if(verbose) print("Plotting violin")
## indicate plot name
plot_name <- "plot_violin"
plot_curr <- plots_row[[plot_name]]
if( (is.null(plot_curr) | render_plots_force) & render_plots) { ## Generate and append plot
## Plotting code goes below here
## Plot violin plots of posterior values
## Based on: https://cran.r-project.org/web/packages/bayesplot/vignettes/plotting-mcmc-draws.html#n_chains <-
fit_array <- as.array(fit_stan_rename)
if(FALSE) { ## use violin plots
plot_tmp <- mcmc_violin(fit_array, pars = c("lp_"), probs = c(0.1, 0.5, 0.9))
} else { # use histograms
plot_tmp <- mcmc_hist_by_chain(fit_array, pars = c("lp_")) #, color_chains = TRUE)
}
plot_violin <- plot_tmp +
yaxis_ticks(on = TRUE) +
yaxis_text(on = TRUE) +
ggtitle(desc_short)
## Finish plotting code
plots_row[[plot_name]] <- plot_violin
} else{
## update last_plot() settings
set_last_plot(plot_violin)
}
plot_trace_and_violin <- plot_grid(plotlist = list(plot_trace, (plot_violin + ggtitle(NULL))),
ncol = 1,
rel_heights=c(0.3, 1))
if(save_plots_file) last_plot_save(file_prefix = "trace-and-violin")
if(display_plots) print(last_plot())
## Count occurence of each male in model fit_brms
## This is used to control plotting
male_instance <- sapply(male_vec, function(x) {sum(str_detect(x, string=vars_clean))})
if(render_hist) {
if(verbose) print("Plotting hist")
## This one can be replace by plot_hex or scatter
plot_name <- "plot_hist"
plot_curr <- plots_row[[plot_name]]
if( (is.null(plot_curr) | render_plots_force) & render_plots) { ## Generate and append plot
vars_fit <- names(fit_stan_rename) %>% na.omit(.) %>% sort(., decreasing = TRUE)
ncol <- 4
## Plotting code goes below here
plot_hist <- stan_hist(fit_stan_rename,
pars = vars_fit,
bins = 30,
ncol = ncol) +
ggtitle(desc_short) +
## Reduce plot label size
theme(plot.title = element_text(size = rel(1), face = "bold"))
## Finish plotting code
plots_row[[plot_name]] <- plot_hist
} else{
## update last_plot() settings
set_last_plot(plot_hist)
}
file_prefix <- sub("plot_", "", plot_name)
if(save_plots_file) last_plot_save(file_prefix = file_prefix)
if(display_plots) print(last_plot())
}
# remove old scatter plots
# plots_row[["plot_scatter"]] <- NULL
if(render_hex) {
plot_name <- "plot_hex"
plot_curr <- plots_row[[plot_name]]
if( (is.null(plot_curr) | render_plots_force) & render_plots) { ## Generate and append plot
print("Plotting Hex")
scatter_list <- list()
for(male in male_vec) {
# print(male)
vars_male <- grep(male, vars_T, value = TRUE)
# mutate(x = gsub("T[0-9]{3}_","", x), y = gsub("T[0-9]{3}_","", y))
if(length(vars_male) == 1) {
## include lp
vars_male <- c(vars_male, "lp_")
ylab <- "log(Posterior)"
} else {
if(length(vars_male) > 2) {
print(
paste0("WARNING: mcmc_hex can only work with 2 variables, but length(vars_male) = ",
length(vars_male),
"; vars_male = ",
paste(vars_male),
" Using only first two elements.",
collapse = ","))
vars_male <- vars_male[1:2]
}
ylab <- "y0"
}
scatter_tmp <- mcmc_hex( #was mcmc_scatter
fit_stan_rename,
## Can only use two variables
pars = c(first(vars_male), last(vars_male)),
) +
##labs(x = "x0", y = ylab, title = male)
labs(title = male)
## Add histograms to plot axes/margin
## ggMarginal doesn't work natively with mcmc_hex, so we need to make the
## points transparent and then add a hex layer
scatter_list[[male]] <- ggExtra::ggMarginal(scatter_tmp +
geom_point(col="transparent") +
geom_hex() +
theme(legend.position = "none"), type = "histogram")
}
p <- plot_grid(plotlist = scatter_list,
ncol = 3) #, legend = TRUE)
title_row <- ggdraw() + draw_label(desc_short, fontface='bold', size = 10)
plot_hex <- plot_grid(title_row, p, ncol = 1, rel_heights=c(0.1, 1))
## Finish plotting code
plots_row[[plot_name]] <- plot_hex
} else {
## update last_plot() settings
set_last_plot(plot_hex)
}
file_prefix <- sub("plot_", "", plot_name)
if(save_plots_file) last_plot_save(file_prefix = file_prefix)
if(display_plots) print(last_plot())
}
if(render_pairs) {
## plot_Pairs not that useful
## Might want to flag off
plot_name <- "plot_pairs"
plot_curr <- plots_row[[plot_name]]
if( (is.null(plot_curr) | render_plots_force) & render_plots) { ## Generate and append plot
print("Plotting Pairs")
## Pairs Plot
list_plot <- list()
if(model == "one_piece" & disp_flag != "individual") {
## each male only appears once in the one piece models
splitby <- 3
nsplits <- max(nmales %/% splitby, 1)
tmp_stop <- (1:nsplits) * splitby
tmp_start <- tmp_stop - (splitby - 1)
for(i in 1:nsplits) {
tmp_range <- tmp_start[[i]]:tmp_stop[[i]]
list_plot[[i]] <- pairs(fit,
off_diag_fun = if_else(off_diag_fun == "hex", "hex", "scatter"),
variable = c(as.character(males[tmp_range]),
"disp"),
regex = TRUE)
}
list_plot[[nsplits+1]] <- pairs(fit,
variable = c("disp", "lprior", "lp_")
)
} else { # not one_piece disp_flag != individual
list_plot <- list()
for(male in male_vec) {
##print(male);
list_plot[[male]] <-
pairs(fit,
off_diag_fun = if_else(off_diag_fun == "hex", "hex", "scatter"),
variable = c(male, "lp_"),
regex = TRUE)
}
#ggtitle(desc_short)
}
#browser()
## Set number of males per row
ifelse(length(list_plot[[male]]) > 2, ncol_row <- 1, ncol_row <- 2)
## Set number of rows/page
ifelse(length(list_plot[[male]]) > 2,
ifelse(length(list_plot[[male]]) > 4,
nrow_page <- 3,
nrow_page <- 4),
nrow_page <- 5)
## Note this makes a list of plots
plot_pairs <- marrangeGrob(grobs = list_plot,
ncol = ncol_row,
nrow = nrow_page,
top = desc_short,
bottom = quote(paste("page", g, "of", npages))
)
## plot_pairs <- cowplot::plot_grid(
## plotlist = list_plot,
## ncol = 2)
## Finish plotting code
#set_last_plot(plot_pairs) ## Don'[t think marrangeGrob updates last_plot()
plots_row[[plot_name]] <- plot_pairs
} else {
## update last_plot() settings
set_last_plot(plot_pairs)
}
if(display_plots) print(plot_pairs); #dev.off()
file_prefix <- sub("plot_", "", plot_name)
## Need to update following line since imarrangeGrob output isn't technically a plot
if(save_plots_file) last_plot_save(file_prefix = file_prefix)
## update plots_row
fit_tbl[[ curr_row, "plots"]][[1]] <- plots_row
}
#graphics.off()
# browser()
curr_row <- curr_row + 1
cat("\\clearpage")
} ## End disp_values
} ## end y0_flag
} ## end x0_flag
} ## end flag_disp
} ## end filter male
} ## end model_used
## [1] "two_piece"
## real two_piece(real x, real x0, real y0) {
## real xmax = 46; ## paste in value for xmax
## real y;
##
## if(x0 > xmax) {
## y = log(0);
## } else {
## y = y0 * (xmax - fmax(x0, x))/(xmax - x0);
## }
## return(y);
## }
## [1] "Working with Pre-existing Fits"
## [1] "`class(fit)` = brmsfit"
##
##
##
## [1] "nbinom_type1; two_piece; x0: individual; y0: individual; disp_flag: uniform_1; disp prior: 0.01; filter: FALSE"
## [1] "Prior Information"
## prior class coef group resp dpar nlpar lb ub source
## uniform(32, 45.9) b x0 32 45.9 user
## uniform(32, 45.9) b maleT234 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT235 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT236 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT237 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT243 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT244 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT246 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT247 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT257 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT258 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT260 x0 32 45.9 (vectorized)
## uniform(10, 1000) b y0 10 1000 user
## uniform(10, 1000) b maleT234 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT235 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT236 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT237 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT243 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT244 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT246 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT247 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT257 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT258 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT260 y0 10 1000 (vectorized)
## (flat) b disp default
## (flat) b disp_group1 disp (vectorized)
## (flat) b disp_group2 disp (vectorized)
## [1] "nbinom_type1; two_piece; x0: individual; y0: individual; disp_flag: uniform_1; disp prior: 0.01; filter: FALSE"
## [1] "Fit Information"
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Family: nbinom_type1
## Links: mu = identity; disp = identity
## Formula: y ~ two_piece(x, x0, y0)
## x0 ~ 0 + male
## disp ~ 0 + disp_group
## y0 ~ 0 + male
## Data: data (Number of observations: 107)
## Draws: 10 chains, each with iter = 40000; warmup = 30000; thin = 5;
## total post-warmup draws = 20000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## x0_maleT234 41.07 0.55 39.85 41.98 1.05 122 7609
## x0_maleT235 44.67 2.01 38.72 45.87 1.62 30 10
## x0_maleT236 39.32 3.15 36.65 45.72 1.44 20 NA
## x0_maleT237 38.51 1.03 36.46 40.31 1.25 65 5771
## x0_maleT243 43.84 0.98 41.90 45.72 1.10 61 3012
## x0_maleT244 44.99 0.56 43.82 45.84 1.23 199 2610
## x0_maleT246 45.74 0.08 45.61 45.89 1.23 71 5009
## x0_maleT247 43.60 0.57 42.01 44.49 1.06 111 9496
## x0_maleT257 44.07 0.99 42.51 45.79 1.10 71 2462
## x0_maleT258 37.78 1.90 33.94 41.12 1.10 63 8719
## x0_maleT260 43.59 1.40 40.97 45.74 1.06 173 12278
## y0_maleT234 52.88 3.44 45.88 60.12 1.44 17841 3941
## y0_maleT235 155.14 15.43 130.41 184.77 1.16 42 15
## y0_maleT236 179.07 12.87 158.77 203.02 1.42 20 10
## y0_maleT237 151.83 10.99 133.87 178.29 1.23 162 2690
## y0_maleT243 138.69 14.61 112.98 170.66 1.23 120 3688
## y0_maleT244 76.88 3.67 69.64 84.70 1.04 371 12892
## y0_maleT246 37.07 4.71 27.37 45.61 1.07 89 4285
## y0_maleT247 111.19 4.68 102.53 121.49 1.24 221 3089
## y0_maleT257 231.79 11.68 211.46 253.58 1.13 48 14916
## y0_maleT258 47.73 6.75 35.51 62.67 1.03 197 7231
## y0_maleT260 86.74 6.85 74.20 99.77 1.10 63 8999
## disp_disp_group1 1.41 0.73 0.00 2.20 1.46 19 23
## disp_disp_group2 17.56 4.35 9.50 25.40 1.44 20 10
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## # A tibble: 10 × 5
## chains mean sd n se
## <int> <dbl> <dbl> <int> <dbl>
## 1 1 -598. 4.28 2000 0.0956
## 2 2 -598. 4.13 2000 0.0924
## 3 3 -598. 4.07 2000 0.0911
## 4 4 -598. 4.03 2000 0.0901
## 5 5 4509. 3.23 2000 0.0722
## 6 6 -598. 4.08 2000 0.0912
## 7 7 4548. 0.873 2000 0.0195
## 8 8 -598. 4.09 2000 0.0915
## 9 9 -598. 3.95 2000 0.0884
## 10 10 -598. 4.02 2000 0.0898
## [1] "Plotting Trace"
## [1] "Plotting violin"
## Warning: The `facets` argument of `facet_grid()` is deprecated as of ggplot2 2.2.0.
## ℹ Please use the `rows` argument instead.
## ℹ The deprecated feature was likely used in the bayesplot package.
## Please report the issue at <https://github.com/stan-dev/bayesplot/issues/>.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was generated.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## [1] "Plotting hist"
## Warning: The dot-dot notation (`..density..`) was deprecated in ggplot2 3.4.0.
## ℹ Please use `after_stat(density)` instead.
## ℹ The deprecated feature was likely used in the rstan package.
## Please report the issue at <https://github.com/stan-dev/rstan/issues/>.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was generated.
## [1] "Plotting Hex"
## [1] "Plotting Pairs"
## \clearpage[1] "Working with Pre-existing Fits"
## [1] "`class(fit)` = brmsfit"
##
##
##
## [1] "nbinom_type1; two_piece; x0: groups_1; y0: individual; disp_flag: uniform_1; disp prior: 0.01; filter: FALSE"
## [1] "Prior Information"
## prior class coef group resp dpar nlpar lb ub
## uniform(32, 45.9) b x0 32 45.9
## uniform(32, 45.9) b Intercept x0 32 45.9
## uniform(10, 1000) b y0 10 1000
## uniform(10, 1000) b maleT234 y0 10 1000
## uniform(10, 1000) b maleT235 y0 10 1000
## uniform(10, 1000) b maleT236 y0 10 1000
## uniform(10, 1000) b maleT237 y0 10 1000
## uniform(10, 1000) b maleT243 y0 10 1000
## uniform(10, 1000) b maleT244 y0 10 1000
## uniform(10, 1000) b maleT246 y0 10 1000
## uniform(10, 1000) b maleT247 y0 10 1000
## uniform(10, 1000) b maleT257 y0 10 1000
## uniform(10, 1000) b maleT258 y0 10 1000
## uniform(10, 1000) b maleT260 y0 10 1000
## (flat) b disp
## (flat) b disp_group1 disp
## (flat) b disp_group2 disp
## student_t(3, 0, 66.7) sd x0 0 10
## student_t(3, 0, 66.7) sd male x0 0 10
## student_t(3, 0, 66.7) sd Intercept male x0 0 10
## source
## user
## (vectorized)
## user
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## default
## (vectorized)
## (vectorized)
## user
## (vectorized)
## (vectorized)
## [1] "nbinom_type1; two_piece; x0: groups_1; y0: individual; disp_flag: uniform_1; disp prior: 0.01; filter: FALSE"
## [1] "Fit Information"
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 17998 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: nbinom_type1
## Links: mu = identity; disp = identity
## Formula: y ~ two_piece(x, x0, y0)
## x0 ~ 0 + Intercept + (1 | male)
## disp ~ 0 + disp_group
## y0 ~ 0 + male
## Data: data (Number of observations: 107)
## Draws: 10 chains, each with iter = 40000; warmup = 30000; thin = 5;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~male (Number of levels: 11)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(x0_Intercept) 3.16 0.82 2.02 5.17 1.09 113 288
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## x0_Intercept 42.29 0.98 40.58 44.06 1.23 35 11
## y0_maleT234 52.07 3.72 45.77 59.67 1.09 83 536
## y0_maleT235 156.70 13.77 130.04 185.29 1.22 477 769
## y0_maleT236 179.51 11.45 156.78 200.02 1.24 31 10
## y0_maleT237 148.26 10.13 129.97 170.95 1.05 201 541
## y0_maleT243 138.76 16.72 110.14 173.36 1.09 75 673
## y0_maleT244 77.22 3.87 69.36 84.96 1.03 447 807
## y0_maleT246 37.89 6.12 27.54 49.49 1.22 33 638
## y0_maleT247 112.11 5.09 101.84 121.89 1.07 141 693
## y0_maleT257 230.42 11.82 207.77 254.22 1.08 82 763
## y0_maleT258 44.90 6.26 33.59 58.64 1.04 256 704
## y0_maleT260 88.36 7.76 74.47 101.40 1.22 34 732
## disp_disp_group1 1.57 0.57 0.00 2.22 1.29 27 15
## disp_disp_group2 19.15 2.78 14.45 25.42 1.05 142 278
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## # A tibble: 10 × 5
## chains mean sd n se
## <int> <dbl> <dbl> <int> <dbl>
## 1 1 -590. 4.58 2000 0.102
## 2 2 -590. 4.47 2000 0.100
## 3 3 -590. 4.04 2000 0.0903
## 4 4 -590. 4.35 2000 0.0972
## 5 5 -591. 4.09 2000 0.0914
## 6 6 -590. 4.35 2000 0.0973
## 7 7 -591. 3.87 2000 0.0866
## 8 8 -591. 3.89 2000 0.0869
## 9 9 -590. 5.71 2000 0.128
## 10 10 4543. 1.82 2000 0.0407
## [1] "Plotting Trace"
## [1] "Plotting violin"
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## [1] "Plotting hist"
## [1] "Plotting Hex"
## [1] "Plotting Pairs"
## \clearpage[1] "Working with Pre-existing Fits"
## [1] "`class(fit)` = brmsfit"
##
##
##
## [1] "nbinom_type1; two_piece; x0: uniform_1; y0: individual; disp_flag: uniform_1; disp prior: 0.01; filter: FALSE"
## [1] "Prior Information"
## prior class coef group resp dpar nlpar lb ub source
## uniform(32, 45.9) b x0 32 45.9 user
## uniform(32, 45.9) b Intercept x0 32 45.9 (vectorized)
## uniform(10, 1000) b y0 10 1000 user
## uniform(10, 1000) b maleT234 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT235 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT236 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT237 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT243 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT244 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT246 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT247 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT257 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT258 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT260 y0 10 1000 (vectorized)
## (flat) b disp default
## (flat) b disp_group1 disp (vectorized)
## (flat) b disp_group2 disp (vectorized)
## [1] "nbinom_type1; two_piece; x0: uniform_1; y0: individual; disp_flag: uniform_1; disp prior: 0.01; filter: FALSE"
## [1] "Fit Information"
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 1091 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: nbinom_type1
## Links: mu = identity; disp = identity
## Formula: y ~ two_piece(x, x0, y0)
## x0 ~ 0 + Intercept
## disp ~ 0 + disp_group
## y0 ~ 0 + male
## Data: data (Number of observations: 107)
## Draws: 10 chains, each with iter = 40000; warmup = 30000; thin = 5;
## total post-warmup draws = 20000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## x0_Intercept 45.77 0.10 45.63 45.88 2.13 22 15
## y0_maleT234 45.85 2.40 41.42 50.24 1.73 23 14
## y0_maleT235 156.74 13.63 134.31 183.52 1.72 19 12
## y0_maleT236 157.15 3.72 148.59 165.03 1.63 34 121
## y0_maleT237 133.59 4.89 123.31 142.89 1.67 27 114
## y0_maleT243 130.11 11.27 111.68 151.38 1.44 22 113
## y0_maleT244 76.83 3.24 70.91 82.54 1.64 22 15
## y0_maleT246 43.30 6.32 29.46 51.22 2.45 12 123
## y0_maleT247 105.12 4.11 97.55 110.58 1.77 18 14
## y0_maleT257 232.80 10.45 215.89 249.09 1.61 19 15
## y0_maleT258 32.45 3.24 25.53 37.76 1.84 23 113
## y0_maleT260 88.34 5.12 76.97 96.99 1.77 26 115
## disp_disp_group1 0.59 0.81 0.00 2.10 3.01 11 14
## disp_disp_group2 19.44 2.36 15.68 24.09 1.60 22 13
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## # A tibble: 10 × 5
## chains mean sd n se
## <int> <dbl> <dbl> <int> <dbl>
## 1 1 4478. 2.33 2000 0.0521
## 2 2 -579. 15.5 2000 0.347
## 3 3 -596. 2.80 2000 0.0627
## 4 4 -596. 2.86 2000 0.0640
## 5 5 -596. 2.88 2000 0.0643
## 6 6 4476. 1.51 2000 0.0337
## 7 7 4466. 0.444 2000 0.00993
## 8 8 4419. 6.11 2000 0.137
## 9 9 4472. 1.61 2000 0.0361
## 10 10 4494. 1.28 2000 0.0286
## [1] "Plotting Trace"
## [1] "Plotting violin"
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## [1] "Plotting hist"
## [1] "Plotting Hex"
## [1] "Plotting Pairs"
## \clearpage[1] "Working with Pre-existing Fits"
## [1] "`class(fit)` = brmsfit"
##
##
##
## [1] "nbinom_type1; two_piece; x0: individual; y0: individual; disp_flag: uniform_2; disp prior: 0.01; filter: FALSE"
## [1] "Prior Information"
## prior class coef group resp dpar nlpar lb ub source
## uniform(32, 45.9) b x0 32 45.9 user
## uniform(32, 45.9) b maleT234 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT235 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT236 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT237 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT243 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT244 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT246 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT247 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT257 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT258 x0 32 45.9 (vectorized)
## uniform(32, 45.9) b maleT260 x0 32 45.9 (vectorized)
## uniform(10, 1000) b y0 10 1000 user
## uniform(10, 1000) b maleT234 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT235 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT236 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT237 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT243 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT244 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT246 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT247 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT257 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT258 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT260 y0 10 1000 (vectorized)
## (flat) b disp default
## (flat) b disp_group disp (vectorized)
## (flat) b Intercept disp (vectorized)
## [1] "nbinom_type1; two_piece; x0: individual; y0: individual; disp_flag: uniform_2; disp prior: 0.01; filter: FALSE"
## [1] "Fit Information"
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Family: nbinom_type1
## Links: mu = identity; disp = identity
## Formula: y ~ two_piece(x, x0, y0)
## x0 ~ 0 + male
## disp ~ 0 + Intercept + disp_group
## y0 ~ 0 + male
## Data: data (Number of observations: 107)
## Draws: 10 chains, each with iter = 40000; warmup = 30000; thin = 5;
## total post-warmup draws = 20000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## x0_maleT234 41.08 0.49 39.90 41.93 1.46 487 876
## x0_maleT235 44.10 3.70 33.05 45.86 1.30 25 10
## x0_maleT236 40.07 3.55 36.70 45.66 1.68 16 NA
## x0_maleT237 38.25 0.91 36.56 40.17 1.13 137 958
## x0_maleT243 44.18 1.12 41.99 45.86 1.38 22 1047
## x0_maleT244 45.08 0.60 43.86 45.85 1.23 31 962
## x0_maleT246 45.77 0.09 45.61 45.90 1.34 23 1003
## x0_maleT247 43.70 0.55 42.07 44.49 1.28 79 952
## x0_maleT257 43.99 0.99 42.51 45.77 1.13 48 964
## x0_maleT258 38.00 1.68 34.00 40.99 1.37 110 909
## x0_maleT260 43.87 1.58 40.99 45.88 1.39 21 1128
## y0_maleT234 53.36 3.56 46.21 60.00 1.11 57 998
## y0_maleT235 163.16 26.15 130.56 231.37 1.40 27 NA
## y0_maleT236 176.42 13.98 155.05 202.49 1.66 16 10
## y0_maleT237 151.27 10.79 134.29 176.89 1.11 56 1009
## y0_maleT243 139.14 14.86 113.59 169.95 1.13 48 877
## y0_maleT244 77.39 3.48 69.86 84.46 1.18 154 970
## y0_maleT246 37.29 4.58 27.44 45.30 1.35 54 889
## y0_maleT247 111.45 4.27 102.87 121.09 1.55 768 943
## y0_maleT257 231.71 9.64 211.50 252.86 1.36 380 996
## y0_maleT258 48.31 6.25 35.88 62.08 1.09 190 799
## y0_maleT260 86.88 5.82 74.84 99.54 1.38 420 831
## disp_Intercept -14.09 3.86 -21.59 -7.73 1.48 19 1322
## disp_disp_group 15.32 4.42 7.73 23.33 1.58 17 10
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## # A tibble: 10 × 5
## chains mean sd n se
## <int> <dbl> <dbl> <int> <dbl>
## 1 1 -598. 4.20 2000 0.0939
## 2 2 -598. 4.16 2000 0.0930
## 3 3 4376. 0 2000 0
## 4 4 4244. 0 2000 0
## 5 5 -598. 3.98 2000 0.0889
## 6 6 -598. 3.98 2000 0.0890
## 7 7 -598. 4.06 2000 0.0908
## 8 8 4291. 0 2000 0
## 9 9 -598. 4.05 2000 0.0907
## 10 10 -598. 4.07 2000 0.0910
## [1] "Plotting Trace"
## [1] "Plotting violin"
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## [1] "Plotting hist"
## [1] "Plotting Hex"
## [1] "Plotting Pairs"
## \clearpage[1] "Working with Pre-existing Fits"
## [1] "`class(fit)` = brmsfit"
##
##
##
## [1] "nbinom_type1; two_piece; x0: groups_1; y0: individual; disp_flag: uniform_2; disp prior: 0.01; filter: FALSE"
## [1] "Prior Information"
## prior class coef group resp dpar nlpar lb ub
## uniform(32, 45.9) b x0 32 45.9
## uniform(32, 45.9) b Intercept x0 32 45.9
## uniform(10, 1000) b y0 10 1000
## uniform(10, 1000) b maleT234 y0 10 1000
## uniform(10, 1000) b maleT235 y0 10 1000
## uniform(10, 1000) b maleT236 y0 10 1000
## uniform(10, 1000) b maleT237 y0 10 1000
## uniform(10, 1000) b maleT243 y0 10 1000
## uniform(10, 1000) b maleT244 y0 10 1000
## uniform(10, 1000) b maleT246 y0 10 1000
## uniform(10, 1000) b maleT247 y0 10 1000
## uniform(10, 1000) b maleT257 y0 10 1000
## uniform(10, 1000) b maleT258 y0 10 1000
## uniform(10, 1000) b maleT260 y0 10 1000
## (flat) b disp
## (flat) b disp_group disp
## (flat) b Intercept disp
## student_t(3, 0, 66.7) sd x0 0 10
## student_t(3, 0, 66.7) sd male x0 0 10
## student_t(3, 0, 66.7) sd Intercept male x0 0 10
## source
## user
## (vectorized)
## user
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## (vectorized)
## default
## (vectorized)
## (vectorized)
## user
## (vectorized)
## (vectorized)
## [1] "nbinom_type1; two_piece; x0: groups_1; y0: individual; disp_flag: uniform_2; disp prior: 0.01; filter: FALSE"
## [1] "Fit Information"
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 9997 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: nbinom_type1
## Links: mu = identity; disp = identity
## Formula: y ~ two_piece(x, x0, y0)
## x0 ~ 0 + Intercept + (1 | male)
## disp ~ 0 + Intercept + disp_group
## y0 ~ 0 + male
## Data: data (Number of observations: 107)
## Draws: 10 chains, each with iter = 40000; warmup = 30000; thin = 5;
## total post-warmup draws = 20000
##
## Group-Level Effects:
## ~male (Number of levels: 11)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(x0_Intercept) 2.46 1.21 0.93 4.95 2.34 13 10
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## x0_Intercept 43.15 1.25 40.92 45.23 1.89 14 59
## y0_maleT234 51.24 4.08 44.98 59.31 1.50 19 11
## y0_maleT235 152.04 16.86 118.05 181.65 1.74 22 10
## y0_maleT236 170.48 12.78 153.49 197.13 2.32 13 10
## y0_maleT237 142.68 11.77 125.16 169.25 1.67 16 10
## y0_maleT243 137.29 14.39 112.89 171.33 1.27 35 172
## y0_maleT244 77.05 3.78 70.85 84.91 1.29 31 161
## y0_maleT246 36.41 3.54 28.26 44.59 1.57 98 368
## y0_maleT247 109.59 5.23 98.74 119.53 1.47 24 11
## y0_maleT257 231.37 9.75 214.84 249.97 1.30 29 288
## y0_maleT258 42.95 7.09 33.24 58.40 1.56 18 11
## y0_maleT260 88.18 5.44 76.51 99.38 1.31 58 413
## disp_Intercept -13.76 4.71 -22.07 -6.68 1.94 14 NA
## disp_disp_group 14.63 5.26 6.68 23.81 1.97 14 10
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## # A tibble: 10 × 5
## chains mean sd n se
## <int> <dbl> <dbl> <int> <dbl>
## 1 1 4327. 0 2000 0
## 2 2 4093. 0 2000 0
## 3 3 4460. 0 2000 0
## 4 4 -589. 3.68 2000 0.0823
## 5 5 4146. 0 2000 0
## 6 6 -591. 4.35 2000 0.0972
## 7 7 -591. 4.03 2000 0.0900
## 8 8 -590. 4.09 2000 0.0914
## 9 9 -590. 4.48 2000 0.100
## 10 10 4131. 0 2000 0
## [1] "Plotting Trace"
## [1] "Plotting violin"
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## [1] "Plotting hist"
## [1] "Plotting Hex"
## [1] "Plotting Pairs"
## \clearpage[1] "Working with Pre-existing Fits"
## [1] "`class(fit)` = brmsfit"
##
##
##
## [1] "nbinom_type1; two_piece; x0: uniform_1; y0: individual; disp_flag: uniform_2; disp prior: 0.01; filter: FALSE"
## [1] "Prior Information"
## prior class coef group resp dpar nlpar lb ub source
## uniform(32, 45.9) b x0 32 45.9 user
## uniform(32, 45.9) b Intercept x0 32 45.9 (vectorized)
## uniform(10, 1000) b y0 10 1000 user
## uniform(10, 1000) b maleT234 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT235 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT236 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT237 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT243 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT244 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT246 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT247 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT257 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT258 y0 10 1000 (vectorized)
## uniform(10, 1000) b maleT260 y0 10 1000 (vectorized)
## (flat) b disp default
## (flat) b disp_group disp (vectorized)
## (flat) b Intercept disp (vectorized)
## [1] "nbinom_type1; two_piece; x0: uniform_1; y0: individual; disp_flag: uniform_2; disp prior: 0.01; filter: FALSE"
## [1] "Fit Information"
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
## Warning: There were 65 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Family: nbinom_type1
## Links: mu = identity; disp = identity
## Formula: y ~ two_piece(x, x0, y0)
## x0 ~ 0 + Intercept
## disp ~ 0 + Intercept + disp_group
## y0 ~ 0 + male
## Data: data (Number of observations: 107)
## Draws: 10 chains, each with iter = 40000; warmup = 30000; thin = 5;
## total post-warmup draws = 20000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## x0_Intercept 45.71 0.17 45.01 45.89 1.08 84 667
## y0_maleT234 45.85 2.93 40.07 51.77 1.05 19343 16471
## y0_maleT235 157.62 14.43 129.29 186.95 1.02 468 18576
## y0_maleT236 156.51 5.52 145.92 167.87 1.01 718 16793
## y0_maleT237 133.06 6.34 120.53 145.93 1.14 18825 17022
## y0_maleT243 132.64 13.94 106.04 158.57 1.07 84 18399
## y0_maleT244 76.17 4.12 69.27 84.43 1.09 71 16367
## y0_maleT246 37.58 5.90 27.15 48.14 1.19 36 15671
## y0_maleT247 103.92 4.29 95.50 112.85 1.23 13414 11334
## y0_maleT257 232.33 10.65 210.87 253.37 1.02 1264 17772
## y0_maleT258 31.38 3.97 23.74 39.63 1.23 20034 19687
## y0_maleT260 87.09 6.55 73.95 100.16 1.02 1091 17190
## disp_Intercept -16.18 2.84 -22.65 -11.26 1.14 19619 19473
## disp_disp_group 17.70 2.90 13.04 24.33 1.03 211 19432
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## # A tibble: 10 × 5
## chains mean sd n se
## <int> <dbl> <dbl> <int> <dbl>
## 1 1 -596. 2.83 2000 0.0633
## 2 2 -596. 2.92 2000 0.0653
## 3 3 -596. 2.81 2000 0.0628
## 4 4 4382. 0 2000 0
## 5 5 -596. 2.83 2000 0.0634
## 6 6 -596. 2.71 2000 0.0605
## 7 7 -596. 2.89 2000 0.0646
## 8 8 -596. 2.88 2000 0.0643
## 9 9 -596. 2.78 2000 0.0622
## 10 10 -596. 2.87 2000 0.0641
## [1] "Plotting Trace"
## [1] "Plotting violin"
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## [1] "Plotting hist"
## [1] "Plotting Hex"
## [1] "Plotting Pairs"
## \clearpage
# rm("curr_row")
print(outfile_tbl)
## [1] NA
if(save_fits) save(fit_tbl, file = outfile_tbl)
knitr::knit_exit()